library(lavaan)
library(qgraph)
library(igraph)
library(GPArotation)
library(mlVAR)
library(graphicalVAR)
library(knitr)
library(gridExtra)
library(Rmisc)
library(psych)
library(rlist)
library(stargazer)
library(data.table)
library(tidyverse)
library(pander)
library(RColorBrewer)
library(animation)
meanSD_r2z2r <- function(x) {
z <- fisherz(x)
z[is.infinite(z)] <- NA
x_bar <- mean(z, na.rm = T)
x_sd <- sd(z, na.rm = T)
r_bar <- fisherz2r(x_bar)
r_sd <- fisherz2r(x_sd)
return(c(r_bar, r_sd))
}
The data include three waves of experience sampling method data from the Personality and Intimate Relationship Study. Data were previously cleaned to remove data points that did not meet inclusion criteria.
##Load Data
wave1_all <- tbl_df(read.csv("~/Box Sync/network/PAIRS/Wave 1/esm_w1_RENAMED.csv"))
wave4_all <- tbl_df(read.csv("~/Box Sync/network/PAIRS/Wave 4/esm_w4_RENAMED_all.csv"))
wave7_all <- tbl_df(read.csv("~/Box Sync/network/PAIRS/Wave 7/esm_w7_RENAMED_all.csv"))
Because the data sets include data that are not being used in this study, we extract the relevant columns (Subject ID, frequency, hour block, day of study, measurement point, and personality items) from the original data frames. Next, we rename the columns for later ease of use and visualization. Finally, because of the small sample size for waves 4 and 7, we merge those data sets.
#Getting necessary columns
#Keeping subject ID and all esm.BFI items
w1 <- dplyr::select(wave1_all, esm.IDnum.w1, esm.PRO01.w1, esm.PRO03.w1,
esm.PRO04.w1, esm.PRO05.w1, dplyr::matches("BFI"),
-dplyr::contains(".1."))
w4 <- dplyr::select(wave4_all, esm.IDnum.w4, esm.PRO01.w4, esm.PRO03.w4,
esm.PRO04.w4, esm.PRO05.w4, matches("BFI"))
w7 <- dplyr::select(wave7_all, esm.IDnum.w7, esm.PRO01.w7, esm.PRO03.w7,
esm.PRO04.w7, esm.PRO05.w7, matches("BFI"))
w7 <- w7[,!(colnames(w7) %in% c("esm.BFI20.w7", "esm.BFI12.w7"))]
# column names for w1
varnames <- c("SID", "freq", "hourBlock", "day", "beepvar",
"A_rude", "E_quiet", "C_lazy",
"N_relaxed", "N_depressed", "E_outgoing",
"A_kind", "C_reliable", "N_worried")
# column names for w4 and w7
varnames_w47 <- c("SID", "freq", "hourBlock", "day", "beepvar",
"E_outgoing","E_quiet",
"C_lazy","C_reliable",
"N_worried","N_relaxed",
"N_depressed", "A_rude",
"A_kind")
# short column names (for plots)
varnames2 <- c("rude", "quiet", "lazy",
"relaxed", "depressed", "outgoing",
"kind", "reliable", "worried")
# rename columns
colnames(w1) <- varnames
colnames(w4) <- varnames_w47
colnames(w7) <- varnames_w47
# change subject IDs to factor
w1$SID <- factor(w1$SID)
w4$SID <- factor(w4$SID)
w7$SID <- factor(w7$SID)
# reorder w4 and w7 columns to match w1
w4 <- w4[,c(varnames,setdiff(names(w4), varnames))]
w7 <- w7[,c(varnames,setdiff(names(w7), varnames))]
# create wave variable before combining data sets.
w4$Wave <- "4"
w7$Wave <- "7"
# merge wave 4 and 7 data sets
w2 <- merge(w4, w7, all = T)
| Variable | New Name | Description |
|---|---|---|
| esm.IDnum.w1 | SID | numeric variable; identification number |
| esm.BFI37.w1 | A_rude | agreeablness, negative; “During the last hour, how rude were you?” Likert scale from 1 to 5; 1 = Not a lot, 3 = Somewhat, 5 = Very |
| esm.BFI21.w1 | E_quiet | extraversion, negative; “During the last hour, how quiet were you?” Likert scale from 1 to 5; 1 = Not a lot, 3 = Somewhat, 5 = Very |
| esm.BFI23.w1 | C_lazy | conscientiousness, negative; “During the last hour, how lazy were you?” Likert scale from 1 to 5; 1 = Not a lot, 3 = Somewhat, 5 = Very |
| esm.BFI09.w1 | N_relaxed | neuroticism, positive; “During the last hour, how relaxed were you?” Likert scale from 1 to 5; 1 = Not a lot, 3 = Somewhat, 5 = Very |
| esm.BFI04.w1 | N_depressed | neuroticism, positive; “During the last hour, did you feel ‘depressed, blue’?” Likert scale from 1 to 5; 1 = Not a lot, 3 = Somewhat, 5 = Very |
| esm.BFI36.w1 | E_outgoing | extraversion, positive; “During the last hour, how ‘outgoing, sociable’ were you?” Likert scale from 1 to 5; 1 = Not a lot, 3 = Somewhat, 5 = Very |
| esm.BFI32.w1 | A_kind | agreeablness, positive; “During the last hour, how ‘considerate, kind’ were you?” Likert scale from 1 to 5; 1 = Not a lot, 3 = Somewhat, 5 = Very |
| esm.BFI13.w1 | C_reliable | conscientiousness, positive; “During the last hour, how reliable were you?” Likert scale from 1 to 5; 1 = Not a lot, 3 = Somewhat, 5 = Very |
| esm.BFI19.w1 | N_worried | neuroticism, positive; “During the last hour, how worried were you?” Likert scale from 1 to 5; 1 = Not a lot, 3 = Somewhat, 5 = Very |
To be able to construct individual networks for participants, we ideally need approximately 50 measurement points. However, for current purposes, we will keep all participants who have at least 20 responses, lest we eliminate a large portion of our subjects.
# get frequency count of subject's responses
problem_w1 <- plyr::count(w1$SID)
problem_w2 <- plyr::count(w2$SID)
# extract subject IDs of those with < 10 measurement points
problem_w1 <- as.character(problem_w1$x[which(problem_w1$freq < 10)])
problem_w2 <- as.character(problem_w2$x[which(problem_w2$freq < 10)])
# save excluded subjects to a separate data frame
excluded <- w1[which(w1$SID %in% problem_w1),]
excluded_w2 <- w2[which(w2$SID %in% problem_w2),]
# remove excluded subjects
w1 <- w1[!(w1$SID %in% problem_w1),]
w2 <- w2[!(w2$SID %in% problem_w2),]
Participants in the study only answered Agreeableness items if they indicated they were interacting with another person during the hour block previous to responding. To retain those measurement points for use in models later, we fill in gaps using within-person means of Agreeabless items.
for (i in unique(w1$SID)){
mean_A_rude <- mean(w1$A_rude[w1$SID == i], na.rm = T)
w1$A_rude[is.na(w1$A_rude) & w1$SID == i] <- mean_A_rude
mean_A_kind <- mean(w1$A_kind[w1$SID == i], na.rm = T)
w1$A_kind[is.na(w1$A_kind) & w1$SID == i] <- mean_A_kind
}
for (i in unique(w2$SID)){
mean_A_rude <- mean(w2$A_rude[w2$SID == i], na.rm = T)
w2$A_rude[is.na(w2$A_rude) & w2$SID == i] <- mean_A_rude
mean_A_kind <- mean(w2$A_kind[w2$SID == i], na.rm = T)
w2$A_kind[is.na(w2$A_kind) & w2$SID == i] <- mean_A_kind
}
Below, we within-person center both data sets to aid in interpretation.
# retain cases where all personality data are retained
w1_com <- w1[complete.cases(w1[,c(6:14)]),]
w2_com <- w2[complete.cases(w2[,c(6:14)]),]
# for waves 4 and 7, create a variable that combines wave and day of study
w2_com$waveDay <- paste(w2_com$Wave, w2_com$day, sep = ".")
# reorder data sets by SID, day, and block
w1_com <- w1_com[order(w1_com$SID, w1_com$day, w1_com$hourBlock),]
w2_com <- w2_com[order(w2_com$SID, w2_com$waveDay, w2_com$hourBlock),]
w1_com <- w1_com %>%
group_by(SID) %>%
arrange(day, hourBlock) %>%
mutate(beepvar3 = seq(1, n(), 1))
w2_com <- w2_com %>%
group_by(SID) %>%
arrange(waveDay, hourBlock) %>%
mutate(beepvar3 = seq(1, n(), 1))
w1_centered <- data.frame(
ddply(w1_com[,-c(2:5,15)], .(SID),
colwise(function(x) x-mean(x, na.rm = T))), w1_com[,c(2:5,15)])
w2_centered <- data.frame(
ddply(w2_com[,-c(2:5,15:17)], .(SID),
colwise(function(x) x-mean(x, na.rm = T))), w2_com[,c(2:5, 15:17)])
# Make numeric subject IDs for each df because mlVAR won't run for factors #
w1_com$SID2 <- as.numeric(w1_com$SID)
w2_com$SID2 <- as.numeric(w2_com$SID)
w1_centered$SID2 <- as.numeric(w1_centered$SID)
w2_centered$SID2 <- as.numeric(w2_centered$SID)
# calculate individual for each variable
w1_com <- w1_com %>%
group_by(SID) %>%
mutate_each_(funs(comp = mean), vars = colnames(w1_com)[6:14]) %>%
ungroup()
w2_com <- w2_com %>%
group_by(SID) %>%
mutate_each_(funs(comp = mean), vars = colnames(w2_com)[6:14]) %>%
ungroup()
# save final data frames to files
write.csv(w1_com, "~/Box Sync/network/PAIRS/Wave 1/esm_w1_networks.csv", row.names = F)
write.csv(w2_com, "~/Box Sync/network/PAIRS/Wave 4 + 7/esm_w47_networks.csv", row.names = F)
Although mlVAR includes both population and subject level effects, it represents subject level effects as deviations from population effects rather than exmaning unique subject-level patterns. To assess such unique effects, below we construct individual networks for all subjects at each wave.
# get subjects
subs_w1 <- as.character(unique(w1_com$SID))
subs_w2 <- as.character(unique(w2_com$SID))
# find subjects' SDs of responses
# need to flat subjects with SD's = 0
w1_test <- w1_com[,c(1,6:14)] %>%
group_by(SID) %>%
mutate_each(funs(sd = sd(., na.rm = TRUE)))
w2_test <- w2_com[,c(1,6:14)] %>%
group_by(SID) %>%
mutate_each(funs(sd = sd(., na.rm = TRUE)))
# loop through data set and jitter where individual SD's = 0 #
for(i in 1:9){
for(k in 1:dim(w1_test)[1]){
if(w1_test[k, i + 10] == 0){
w1_test[k, i + 1] <-
jitter(as.numeric(w1_test[k, i + 1]), amount = runif(1, 0, .05))
}
}
}
# loop through data set and jitter where individual SD's = 0 #
for(i in 1:9){
for(k in 1:dim(w2_test)[1]){
if(w2_test[k, i + 10] == 0){
w2_test[k, i + 1] <-
jitter(as.numeric(w2_test[k, i + 1]), amount = runif(1, 0, .05))
}
}
}
For idiographic networks, we estimate a Gaussian graphical model (GGM) variation of the vector autoregressive model (VAR), which estimates a partial correlation network in which correlations represent the correlation between variables after conditioning on all other variables. These models are regularized using a variant of the least absolute shrinkage and selection operator (LASSO), graphical LASSO (glasso). In addition, glasso includes a tuning parameter that can be set to control the sparsity of the network. Different values of the parameter can be chosen to optimize prediction accuracy to minimize an information criterion, such as the Bayesian information criterion (BIC) or the extended BIC (EBIC; Chen & Chen, 2008).
# load final data sets with jitter
w1_test <- read.csv("~/Box Sync/network/PAIRS/Wave 1/esm_w1_jittered.csv")
w2_test <- read.csv("~/Box Sync/network/PAIRS/Wave 4 + 7/esm_w47_jittered.csv")
# save subjects to a vector
subs2_w1 <- as.character(unique(w1_test$SID))
subs2_w2 <- as.character(unique(w2_test$SID))
# create function to run graphicalVAR for each subject with scaled tuning parameters
gVAR_fun <- function(x, SID, wave){
print(paste(wave, SID))
n <- dim(x)[1]
gamma <- 0#.005 * n
lambda <- seq(.025, .25, .025)#.003 * n
x <- x %>% select(A_rude:N_worried)
fit <-
graphicalVAR(x, gamma = gamma, maxit.in = 1000, maxit.out = 1000,
lambda_beta = lambda, lambda_kappa = lambda,
verbose = T, scale = F, centerWithin = F)
return(fit)
}
# calculate networks for each subject for wave 1 #
gVAR_fit_w1 <- w1_test %>%
select(-contains("sd")) %>%
group_by(SID) %>%
nest() %>%
mutate(gVAR_fit = map(data, possibly(gVAR_fun, NA_real_)))
save(gVAR_fit_w1, file = "~/Box Sync/network/PAIRS/Wave 1/graphicalVAR_allSubs.RData")
# calculate networks for each subject for wave 2 #
gVAR_fit_w2 <- w2_test %>%
select(-contains("sd")) %>%
filter(!(SID %in% c("10440", "10516")) %>%
group_by(SID) %>%
nest() %>%
mutate(gVAR_fit = map(data, possibly(gVAR_fun, NA_real_)))
save(gVAR_fit_w2, file = "~/Box Sync/network/PAIRS/Wave 4 + 7/graphicalVAR_allSubs.RData")
# load idiographic networks #
load("~/Box Sync/network/PAIRS/Wave 1/graphicalVAR_allSubs.RData")
load("~/Box Sync/network/PAIRS/Wave 4 + 7/graphicalVAR_allSubs.RData")
# create simple function to extract temporal networks #
# and save them to a formatted long format data frame #
beta_fun <- function(fit, SID){
PDC <- fit$PDC
from <- row.names(PDC)
PDC.long <- tbl_df(PDC) %>%
mutate(from = from,
type = "Temporal") %>%
gather(key = to, value = weight, A_rude:N_worried)
}
# create a simple function to extract matrix of contemporaneous effects #
kappa_mat_fun <- function(fit){fit$PCC}
# create simple function to extract contemporaneous networks #
# and save them to a formatted long format data frame #
kappa_long_fun <- function(fit){
PCC <- fit$PCC
PCC <- PCC[,order(colnames(PCC))]
PCC <- PCC[order(rownames(PCC)),]
PCC[lower.tri(PCC, diag = T)] <- NA
vars <- rownames(PCC)
PCC.long <- tbl_df(PCC) %>%
mutate(Var1 = vars,
type = "Contemporaneous") %>%
gather(key = Var2, value = weight, A_kind:N_worried) %>%
filter(!is.na(weight)) %>%
unite(var, Var1, Var2, sep = ".", remove = F)
}
# run extraction functions on each idiographic network #
gVAR_fit_w1 <- gVAR_fit_w1 %>%
filter(!is.na(gVAR_fit)) %>%
mutate(wave = "1",
beta = map2(gVAR_fit, SID, beta_fun),
kappa_mat = map(gVAR_fit, kappa_mat_fun),
kappa = map(gVAR_fit, kappa_long_fun))
# run extraction functions on each idiographic network #
gVAR_fit_w2 <- gVAR_fit_w2 %>%
filter(!is.na(gVAR_fit)) %>%
mutate(wave = "2",
beta = map2(gVAR_fit, SID, beta_fun),
kappa_mat = map(gVAR_fit, kappa_mat_fun),
kappa = map(gVAR_fit, kappa_long_fun))
# unnest temporal effects and merge waves #
beta_long <- gVAR_fit_w1 %>%
unnest(beta, .drop = T)
beta_long <- gVAR_fit_w2 %>%
unnest(beta, .drop = T) %>%
full_join(beta_long)
# get SIDs from models
SID_w1 <- as.character(unique(gVAR_fit_w1$SID))
SID_w2 <- as.character(unique(gVAR_fit_w2$SID))
#get variable names from models
varnames <- unique(beta_long$from)
# find subjects in both waves
w1w2_subs <- unique(SID_w1)[unique(SID_w1) %in% unique(SID_w2)]
kappa_w1 <- gVAR_fit_w1$kappa
kappa_w2 <- gVAR_fit_w2$kappa
# unnest contemporaneous effects and merge waves #
kappa_long <- gVAR_fit_w1 %>%
unnest(kappa, .drop = T)
kappa_long <- gVAR_fit_w2 %>%
unnest(kappa, .drop = T) %>%
full_join(kappa_long)
One way to conceptualize edge stability is to examine the stability of specific edges across all subjects over time. That is, are some edges more stable than others?
# create simple function to calculate rank-order correlations across waves #
cor_fun <- function(x){
results <- cor(x$`1`, x$`2`, use = "pairwise", method = "spearman")
}
# create rank variable for each edge #
beta_long <- beta_long %>%
select(SID, wave, type, from, to, weight) %>%
dplyr::filter(SID %in% w1w2_subs) %>%
group_by(wave, from, to) %>%
mutate(rank = dense_rank(desc(weight))) %>%
ungroup() %>%
mutate(x = ifelse(wave == "1", -1, 1))
# calculate rank-order correlations across waves for each edge #
beta_cors_long <- beta_long %>%
select(-x, -weight) %>%
spread(key = wave, value = rank) %>%
group_by(from, to) %>%
nest() %>%
mutate(r = map_dbl(data, cor_fun))
# unnest rank-order correlations #
beta_cors_long.df <- beta_cors_long %>% unnest(r, .drop = T)
# average rank-order correlation across all variables #
kable(beta_cors_long.df %>%
summarize(mean = meanSD_r2z2r(r)[1],
sd = meanSD_r2z2r(r)[2]),
caption = "Average Idiographic Partial Contemporaneous Correlations")
| mean | sd |
|---|---|
| 0.0188947 | 0.0897663 |
# create heatmap correlation matrix of rank-order correlations #
PDC_cors_heatmap <- beta_cors_long.df %>%
mutate(measure = "Rank-Order") %>%
filter(measure == "Rank-Order") %>%
mutate(from = factor(from, levels = sort(unique(from))),
to = factor(to, levels = rev(sort(unique(to))))) %>%
ggplot(aes(x = from, y = to, fill = r)) +
geom_tile() +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-.5,.5), space = "Lab",
name="Rank-Order\nCorrelations") +
geom_text( aes(label = round(r,2))) +
facet_grid(.~measure) +
labs(x = "From", y = "To") +#, title = "Temporal Edge Stability Correlations") +
theme_classic() +
theme(axis.text = element_text(face = "bold"),
axis.title = element_text(face = "bold"),
strip.text = element_text(face = "bold"),
legend.title = element_text(face = "bold"))
ggsave(file = "~/Box Sync/network/PAIRS/Brown Bag Presentation 3.31/PDC_Edge_Cors_heatmap.png", width = 9, height = 6)
# create vector with all edges #
vars <- rep(NA, 81)
k <- 1
for(i in varnames[1:9]){for(j in varnames[1:9]){vars[k] <- paste(i, j, sep = "."); k <- k+1}}
# create a function for plotting ranks #
draw.a.plot <- function(x){
var <- separate(as.data.frame(x), x, into = c("from", "to"), sep = "[.]")
data <- beta_long %>%
filter(from == var$from & to == var$to &
SID %in% w1w2_subs) %>%
mutate(V1V2 = paste(from, to, sep = " -> "),
wave = recode(wave, `1` = "1", `2` = "2"))
ggplot(data, aes(x = as.numeric(wave), y = rank, group = factor(SID))) +
geom_line(aes(color = factor(SID)), size = .3) +
scale_x_continuous(limits = c(.9,2.1), breaks = 1:2) +
#scale_color_manual(values = c("indianred1", "black")) +
geom_point(size = .5) +
labs(x = "Wave", y = "Rank", color = "Subject",
title = data$V1V2) +
theme_bw() +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
panel.grid.major = element_blank(),
plot.title = element_text(hjust = .5, face = "bold"),
axis.title = element_text(face = "bold"),
axis.text = element_text(face = "bold"),
legend.title = element_text(face = "bold"),
legend.position = "none")
}
# plot sample edge ranks #
draw.a.plot("N_relaxed.N_worried")
ggsave("~/Box Sync/network/PAIRS/Brown Bag Presentation 3.31/PDC_vars_denserank.png", width = 5, height = 8)
#set up function to loop through the draw.a.plot() function
loop.animate <- function() {
lapply(vars, function(i) {
print(draw.a.plot(i))
})
}
#create GIF of all edges
saveGIF(loop.animate(), interval = .5, movie.name="PDC_vars_denserank.gif", ani.width = 250, ani.height = 400,
imgdir = "~/Box Sync/network/PAIRS/Brown Bag 3.31/")
PDC_cors_heatmap
# create a simple function to calculate rank order correlations #
cor_fun <- function(x){
results <- cor(x$`1`, x$`2`, use = "pairwise", method = "spearman")
}
# create rank variable #
kappa_long <- kappa_long %>%
select(SID, wave, type, var, Var1, Var2, weight) %>%
dplyr::filter(SID %in% w1w2_subs) %>%
group_by(wave, Var1, Var2) %>%
mutate(rank = min_rank(desc(weight))) %>%
ungroup() %>%
mutate(x = ifelse(wave == "1", -1, 1))
# calculate rank order correlations #
kappa_cors_long <- kappa_long %>%
select(-x, -weight) %>%
spread(key = wave, value = rank) %>%
group_by(Var1, Var2) %>%
nest() %>%
mutate(r = map(data, cor_fun))
# unnest rank order correlations #
kappa_cors_long.df <- kappa_cors_long %>% unnest(r, .drop = T) %>%
mutate(measure = "Rank-Order", "Absolute Change")
# table of average Contemporaneous Rank-Order Correlations #
kable(kappa_cors_long.df %>%
group_by(measure) %>%
summarize(mean = meanSD_r2z2r(r)[1],
sd = meanSD_r2z2r(r)[2]),
caption = "Average Idiographic Partial Contemporaneous Rank-Order Correlations")
| measure | mean | sd |
|---|---|---|
| Rank-Order | 0.0968178 | 0.1131593 |
# plot heatmap of contemporaneous rank-order correlations #
PCC_cors_heatmap <- kappa_cors_long.df %>%
filter(measure == "Rank-Order") %>%
mutate(Var2 = factor(Var2, levels = rev(sort(unique(Var2))))) %>%
ggplot(aes(x = Var1, y = Var2, fill = r)) +
geom_raster() +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-.5,.5), space = "Lab",
name="Edge Stability\nCorrelations") +
geom_text(aes(label = round(r,2))) +
#geom_text(data = filter(PCC_cors_long.df, measure == "Absolute Change"), aes(label = round(sd,2))) +
facet_grid(.~measure) +
labs(x = "From", y = "To") +
theme_classic() +
theme(strip.text = element_text(face = "bold"),
axis.text = element_text(face = "bold"),
axis.title = element_text(face = "bold"),
legend.title = element_text(face = "bold"),
legend.text = element_text(face = "bold"))
ggsave(file = "~/Box Sync/network/PAIRS/Brown Bag Presentation 3.31/PCC_Edge_Cors_heatmap.png", width = 9, height = 6)
# create a function for plotting contemporaneous ranks #
draw.a.plot <- function(x){
var <- separate(as.data.frame(x), x, into = c("Var1", "Var2"), sep = "[ ]")
data <- kappa_long %>%
filter(Var1 == var$Var1 & Var2 == var$Var2 &
SID %in% w1w2_subs) %>%
mutate(V1V2 = paste(Var1, Var2, sep = " - ")) %>%
group_by(wave, V1V2) %>%
mutate(rank = dense_rank(desc(weight)))
ggplot(data, aes(x = as.numeric(wave), y = rank, group = factor(SID))) +
geom_line(aes(color = factor(SID)), size = .3) +
scale_x_continuous(limits = c(.9,2.1), breaks = 1:2) +
#scale_color_manual(values = c("indianred1", "black")) +
geom_point(size = .5) +
labs(x = "Wave", y = "Rank", color = "Subject",
title = data$V1V2) +
theme_bw() +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
panel.grid.major = element_blank(),
plot.title = element_text(hjust = .5, face = "bold"),
axis.title = element_text(face = "bold"),
axis.text = element_text(face = "bold"),
legend.title = element_text(face = "bold"),
legend.position = "none")
}
# plot sample edge ranks #
draw.a.plot("N_relaxed N_worried")
ggsave("~/Box Sync/network/PAIRS/Brown Bag Presentation 3.31/PCC_vars_denserank.png", width = 5, height = 8)
#set up function to loop through the draw.a.plot() function
loop.animate <- function() {
lapply(unique(kappa_long$var), function(i) {
print(draw.a.plot(i))
})
}
saveGIF(loop.animate(), interval = .5, movie.name="PCC_vars_denserank.gif", ani.width = 250, ani.height = 400,
imgdir = "~/Box Sync/network/PAIRS/Brown Bag 3.31/")
PCC_cors_heatmap
Another way to conceptualize stability is to assess a single person’s stability over time. Below, we take profile correlations of all edges for each subject individually for both temporal and contemporaneous effects. The results are then displayed in a histogram.
# calculate profile correlations of temporal effects #
ip_beta_cors <- beta_long %>%
dplyr::filter(SID %in% w1w2_subs) %>%
arrange(wave, SID, from, to) %>%
select(-x, -rank) %>%
spread(wave, weight) %>%
group_by(SID) %>%
summarize(cors = cor(`1`, `2`, use = "pairwise.complete.obs")) %>%
mutate(type = "PDC")
# calculate profile correlations of contemporaneous effects #
# merge with temporal effects #
ip_cors <- kappa_long %>%
dplyr::filter(SID %in% w1w2_subs) %>%
arrange(wave, SID, var) %>%
select(-x, -rank) %>%
spread(wave, weight) %>%
group_by(SID) %>%
summarize(cors = cor(`1`, `2`, use = "pairwise.complete.obs")) %>%
mutate(type = "PCC") %>%
full_join(ip_beta_cors)
# save network type #
ip_cors$type <- factor(ip_cors$type, levels = c("PDC", "PCC"))
ip_cors$type2 <- factor(ifelse(ip_cors$type == "PDC", "Temporal", "Contemporaneous"),
levels = c("Temporal", "Contemporaneous"))
# create a table of average profile correlations #
kable(ip_cors %>%
group_by(type2) %>%
summarize(mean_cor = meanSD_r2z2r(cors)[1],
sd_cor = meanSD_r2z2r(cors)[2]), caption = "Average Ipsative Correlations")
| type2 | mean_cor | sd_cor |
|---|---|---|
| Temporal | 0.0284360 | 0.1464810 |
| Contemporaneous | 0.6364959 | 0.4082291 |
# create table of profile correlations for subject 90 #
kable(ip_cors %>%
filter(SID == "90"), caption = "Ipsative Correlation for Subject 90")
| SID | cors | type | type2 |
|---|---|---|---|
| 90 | 0.184547 | PCC | Contemporaneous |
| 90 | -0.046542 | PDC | Temporal |
# create histogram of profile correlations #
ggplot(ip_cors, aes(x = cors)) +
geom_histogram(color = "black", fill = "gray") +
labs(x = "Ipsative Correlations", y = "Frequency",
title = "Ipsative Correlations of graphicalVAR Edge Weights") +
xlim(c(-1, 1)) +
facet_grid(.~type2) +
theme_bw()
ggsave("~/Box Sync/network/PAIRS/manuscript/ipsative_cor.png", width = 6, height = 3)
# create histogram of temporal profile correlations #
PDC_ip_cors <- ip_cors %>%
filter(type2 == "Temporal") %>%
ggplot(aes(x = cors)) +
geom_histogram(color = "black", fill = "gray") +
labs(x = "Ipsative Correlations", y = "Frequency",
title = "Ipsative Correlations of graphicalVAR Edge Weights") +
xlim(c(-1, 1)) +
facet_grid(.~type2) +
theme_bw() +
theme(plot.title = element_text(hjust = .5))
# create a function for plotting subjects' profiles of edges #
draw.a.plot <- function(sub){
data <- beta_long %>%
filter(SID == sub) %>%
mutate(V1V2 = paste(from, to, sep = " -> "))
ggplot(data, aes(x = V1V2, y = weight, group = wave)) +
geom_line(aes(color = wave), size = 1.2) +
scale_y_continuous(limits = c(-1, 1), breaks = seq(-1,1,.5)) +
#geom_point(size = .5) +
scale_color_manual(values = c("orchid", "black")) +
labs(x = NULL, y = "Edge Weight", color = "Wave", title = sprintf("Subject %s", sub)) +
geom_hline(aes(yintercept = 0), linetype = "dashed") +
coord_flip() +
theme_bw() +
theme(axis.text.y = element_text(size = rel(.7), face = "bold"),
axis.text.x = element_text(face = "bold"),
axis.title = element_text(face = "bold"),
legend.title = element_text(face = "bold"),
panel.grid.major = element_blank(),
panel.grid.minor.x = element_blank(),
plot.title = element_text(hjust = .5))
}
# sample profile plot for subject 90 #
draw.a.plot("90")
ggsave("~/Box Sync/network/PAIRS/Brown Bag Presentation 3.31/PDC_EW_ipsative.png", width = 5, height = 8)
#set up function to loop through the draw.a.plot() function
loop.animate <- function() {
lapply(as.character(w1w2_subs[10:30]), function(i) {
print(draw.a.plot(i))
})
}
saveGIF(loop.animate(), interval = .5, movie.name="PDC_EW_ipsative.gif", ani.width = 300, ani.height = 450,
imgdir = "~/Box Sync/network/PAIRS/Brown Bag 3.31/")
# create histogram of contemporaneous profile correlations #
PCC_ip_cors <- ip_cors %>%
filter(type2 == "Contemporaneous") %>%
ggplot(aes(x = cors)) +
geom_histogram(color = "black", fill = "gray") +
labs(x = "Ipsative Correlations", y = "Frequency",
title = "Ipsative Correlations of graphicalVAR Edge Weights") +
xlim(c(-1, 1)) +
facet_grid(.~type2) +
theme_bw() +
theme(plot.title = element_text(hjust = .5))
# create a function to plot subjects' contemporaneous profiles #
draw.a.plot <- function(z){
data <- kappa_long %>%
filter(SID == z) %>%
mutate(V1V2 = paste(Var1, Var2, sep = " - "))
ggplot(data, aes(x = V1V2, y = weight, group = wave)) +
geom_line(aes(color = wave), size = 1.2) +
scale_color_manual(values = c("orchid", "black")) +
scale_y_continuous(limits = c(-1, 1), breaks = seq(-1,1,.5)) +
geom_hline(aes(yintercept = 0), linetype = "dashed") +
#geom_point(size = .5) +
labs(x = NULL, y = "Edge Weight", color = "Wave", title = sprintf("Subject %s", z)) +
coord_flip() +
theme_bw() +
theme(axis.text.y = element_text(size = rel(.7), face = "bold"),
axis.text.x = element_text(face = "bold"),
axis.title = element_text(face = "bold"),
legend.title = element_text(face = "bold"),
panel.grid.major = element_blank(),
panel.grid.minor.x = element_blank(),
plot.title = element_text(hjust = .5))
}
# sample plot for subject 90 #
draw.a.plot("90")
ggsave("~/Box Sync/network/PAIRS/Brown Bag Presentation 3.31/PCC_EW_ipsative.png", width = 5, height = 8)
#set up function to loop through the draw.a.plot() function
loop.animate <- function() {
lapply(as.character(w1w2_subs[10:30]), function(i) {
print(draw.a.plot(i))
})
}
saveGIF(loop.animate(), interval = .5, movie.name="PCC_ipsative.gif", ani.width = 300, ani.height = 450,
imgdir = "~/Box Sync/network/PAIRS/Brown Bag 3.31/")
As with between-person effects, we can calculate centrality for individuals.
# save variable names #
varnames <- varnames[-c(10:13)]
# create function to save both centrality measure and variable
# names to a data frame for temporal networks #
centrality_fun <- function(x) {
data <- x %>%
select(from, to, weight) %>%
mutate(weight = as.numeric(weight))
centrality <- centrality_auto(data.frame(data))
df <- data.frame(var = varnames,
centrality$node.centrality)
}
# create function to save both centrality measure and variable
# names to a data frame for contemporaneous networks #
kappa_cen_fun <- function(x){
centrality <- centrality_auto(x)$node.centrality
centrality$var <- varnames
return(centrality)
}
# calculate centrality for each subject for each wave and save them to a list #
gVAR_fit_w1 <- gVAR_fit_w1 %>%
mutate(beta_centrality = map(beta, centrality_fun),
kappa_centrality = map(kappa_mat, kappa_cen_fun))
gVAR_fit_w2 <- gVAR_fit_w2 %>%
mutate(beta_centrality = map(beta, centrality_fun),
kappa_centrality = map(kappa_mat, kappa_cen_fun))
# unnest and merge temporal centrality #
beta_centrality <- gVAR_fit_w1 %>%
unnest(beta_centrality)
beta_centrality <- gVAR_fit_w2 %>%
unnest(beta_centrality) %>%
full_join(beta_centrality) %>%
mutate(type = "Temporal") %>%
select(SID, wave, type, var, Betweenness:OutStrength)
# unnest and merge contemporaneous centrality #
kappa_centrality <- gVAR_fit_w1 %>%
unnest(kappa_centrality)
kappa_centrality <- gVAR_fit_w2 %>%
unnest(kappa_centrality) %>%
full_join(kappa_centrality) %>%
select(-Degree) %>%
mutate(type = "Contemporaneous") %>%
select(SID, wave, type, var, Betweenness:Strength)
save(kappa_centrality, beta_centrality,
file = "~/Box Sync/network/PAIRS/centrality/graphicalVAR_centrality.RData")
load("~/Box Sync/network/PAIRS/centrality/graphicalVAR_centrality.RData")
# create a function to plot centrality for individual subjects #
centrality_Plot_fun <- function(x, type2){
dat <- centrality_all %>%
filter(SID == x, type == type2) %>%
arrange(measure, wave)
dat %>%
ggplot(aes(x = var, y = z, group = wave))+
geom_line(aes(linetype = wave), color = "black", size = .3) +
geom_point(aes(shape = wave), size = 2) +
labs(x = NULL, y = "z-score") +
ylim(-2,2) +
coord_flip() +
labs(title = x) +
facet_wrap(~type + measure, nrow = 1) +
theme_bw() +
theme(plot.title = element_text(hjust = .5),
legend.position = "bottom")
}
# wrangle temporal centrality to long form and standardize #
centrality <- beta_centrality %>%
gather(key = measure, value = centrality,
Betweenness, Closeness, InStrength, OutStrength) %>%
group_by(SID, wave, measure) %>%
mutate(z = scale(centrality)) %>%
ungroup()
# wrangle contemporaneous centrality to long form and standardize #
# merge with temporal #
centrality_all <- kappa_centrality %>%
gather(key = measure, value = centrality,
Betweenness, Closeness, Strength) %>%
group_by(SID, wave, measure) %>%
mutate(z = scale(centrality)) %>%
ungroup() %>%
full_join(centrality)
# create a function to plot centrality profiles #
draw.a.plot <- function(x,y){
centrality <- centrality_all %>%
filter(SID == x) %>%
arrange(measure, wave)
centrality %>%
filter(type == y & (measure != "Betweenness" & measure != "Closeness")) %>%
ggplot(aes(x = var, y = z, group = wave))+
geom_line(aes(linetype = wave), color = "black", size = .3) +
geom_point(aes(shape = wave), size = 2) +
labs(x = NULL, y = "z-score", title = sprintf("Subject %s", x),
shape = "Wave", linetype = "Wave") +
ylim(-2.5,2.5) +
geom_hline(aes(yintercept = 0), linetype = "dashed") +
coord_flip() +
facet_wrap(~type + measure, nrow = 1) +
theme_bw() +
theme(plot.title = element_text(hjust = .5, face = "bold"),
axis.text = element_text(face = "bold"),
axis.title = element_text(face = "bold"),
legend.title = element_text(face = "bold"))
}
#set up function to loop through the draw.a.plot() function #
# temporal network centrality #
loop.animate <- function() {
lapply(w1w2_subs[10:30], function(i) {
print(draw.a.plot(i, "Temporal"))
})
}
saveGIF(loop.animate(), interval = .5, movie.name="PDC_centrality.gif", ani.width = 400, ani.height = 350,
imgdir = "~/Box Sync/network/PAIRS/Brown Bag 3.31/")
#set up function to loop through the draw.a.plot() function #
# contemporaneous network centrality #
loop.animate <- function() {
lapply(w1w2_subs[10:30], function(i) {
print(draw.a.plot(i, "Contemporaneous"))
})
}
saveGIF(loop.animate(), interval = .5, movie.name="PCC_centrality.gif", ani.width = 275, ani.height = 350,
imgdir = "~/Box Sync/network/PAIRS/Brown Bag 3.31/")
# relevel network type variable
centrality$type <- factor(centrality$type, levels = c("Temporal", "Contemporaneous"))
# sample centrality plot for subject 90 #
PDC_centralityPlot_90 <- centrality_all %>%
filter(type == "Temporal" & (measure != "Betweenness" & measure != "Closeness") & SID == "90") %>%
ggplot(aes(x = var, y = z, group = wave))+
geom_line(aes(linetype = wave), color = "black", size = .3) +
geom_point(aes(shape = wave), size = 2) +
labs(x = NULL, y = "z-score", linetype = "Wave", shape = "Wave") +
scale_y_continuous(limits = c(-2.5,2.5), breaks = seq(-2.5,2.5,.5)) +
geom_hline(aes(yintercept = 0), linetype = "dashed") +
coord_flip() +
facet_wrap(~type + measure, nrow = 1) +
theme_bw()+
theme(axis.text = element_text(face = "bold"),
axis.title = element_text(face = "bold"),
legend.title = element_text(face = "bold"))
ggsave("~/Box Sync/network/PAIRS/Brown Bag Presentation 3.31/PDC_centrality_plot_90.png", width = 6, height = 5)
# create contemporaneous centrality plot for subject 90 #
PCC_centralityPlot_90 <- centrality_all %>%
filter(type == "Contemporaneous" & measure == "Strength" & SID == "90") %>%
ggplot(aes(x = var, y = z, group = wave))+
geom_line(aes(linetype = wave), color = "black", size = .3) +
geom_point(aes(shape = wave), size = 2) +
labs(x = NULL, y = "z-score", linetype = "Wave", shape = "Wave") +
scale_y_continuous(limits = c(-2.5,2.5), breaks = seq(-2.5,2.5,.5)) +
geom_hline(aes(yintercept = 0), linetype = "dashed") +
coord_flip() +
facet_wrap(~type + measure, nrow = 1) +
theme_bw() +
theme(axis.text = element_text(face = "bold"),
axis.title = element_text(face = "bold"),
legend.title = element_text(face = "bold"))
ggsave("~/Box Sync/network/PAIRS/Brown Bag Presentation 3.31/PCC_centrality_plot_90.png", width = 4, height = 5)
We can also look at the stability of the centrality of specific variables across the waves. That is, do variables maintain relatively the same importance in a subject’s network across the waves.
# save subjects to a vector #
subs_w1 <- as.character(unique(kappa_centrality$SID[kappa_centrality$wave == "1"]))
subs_w2 <- as.character(unique(kappa_centrality$SID[kappa_centrality$wave == "2"]))
w1w2_subs <- as.character(subs_w1[subs_w1 %in% subs_w2])
# create a simpel function to calculate rank order correlations
cor_fun <- function(x){
x <- x %>% separate(measure3, into = c("Measure", "cor_type"), remove = F, "[.]")
cor(x$`1`, x$`2`, use = "pairwise", method = "spearman")
}
# wrangle centrality to long form and calculate ranks #
beta_centrality_rank <- tbl_df(beta_centrality) %>%
filter(SID %in% w1w2_subs) %>%
gather(key = measure, value = Centrality, Betweenness:OutStrength) %>%
group_by(measure, var, type, wave) %>%
mutate(rank = min_rank(desc(Centrality))) %>%
ungroup() %>%
gather(key = measure2, value = value, Centrality, rank) %>%
unite(measure3, measure, measure2, remove = F, sep = ".") %>%
spread(key = wave, value = value)
# caclulate temporal rank order correlations #
beta_centrality_cor <- beta_centrality_rank %>%
filter(measure2 == "rank") %>%
group_by(var, type, measure, measure2) %>%
nest() %>%
mutate(r = map(data, cor_fun))
# unnest temporal rank order correlations #
beta_centrality_cor.df <- beta_centrality_cor %>%
unnest(r, .drop = T)
# create table of average rank order correlations #
kable(beta_centrality_cor.df %>%
group_by(measure2, measure) %>%
summarize(mean = meanSD_r2z2r(r)[1],
sd = meanSD_r2z2r(r)[2]),
caption = "Average Rank Order Centrality Partial Directed Correlation")
# create heatmap of centrality rank order correlations #
beta_centrality_cor.df %>%
filter(measure == "InStrength" | measure == "OutStrength") %>%
select(var, measure, measure2, r) %>%
mutate(measure2 = "Rank-Order") %>%
ggplot(aes(x = measure, y = var, fill = r)) +
geom_raster() +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-.5,.5), space = "Lab",
name="Centrality\nCorrelations") +
geom_text(aes(label = round(r,2))) +
labs(x = NULL, y = NULL) +
facet_grid(.~measure, scale = "free") +
theme_classic() +
theme(axis.text = element_text(face = "bold"), legend.position = "none",
axis.text.x = element_blank())
ggsave("~/Box Sync/network/PAIRS/Brown Bag Presentation 3.31/PDC_centrality_var_cor.png",
width = 4, height = 4)
# create function to plot profiles of temporal centrality #
draw.a.plot <- function(x){
data <- beta_centrality %>%
filter(var == x & SID %in% w1w2_subs) %>%
select(-Betweenness, -Closeness) %>%
gather(key = type2, value = Centrality, InStrength, OutStrength) %>%
spread(key = wave, value = Centrality) %>%
mutate(diff = `1` - `2`) %>%
gather(key = wave, value = Centrality, `1`, `2`)
max_cen <- ceiling(max(data$Centrality, na.rm = T))
break_size <- max_cen/2
plot <- ggplot(data, aes(x = as.character(SID), y = Centrality, group = wave)) +
geom_hline(aes(yintercept = 0), linetype = "dashed") +
geom_line(aes(color = wave), size = 1.2) +
scale_color_manual(values = c("orchid", "black")) +
scale_y_continuous(limits = c(-1*max_cen, max_cen),
breaks = seq(-max_cen, max_cen, break_size)) +
#geom_point(size = .5) +
labs(x = "Subject", y = "Centrality", color = "Wave",
title = x) +
coord_flip() +
theme_bw() +
facet_grid(.~type2) +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
panel.grid.major = element_blank(),
plot.title = element_text(hjust = .5))
plot2 <- plot + geom_line(aes(y = diff), color = "blue", size = 1)
return(list(plot, plot2))
}
#set up function to loop through the draw.a.plot() function
loop.animate <- function() {
lapply(varnames[1:9], function(i) {
print(draw.a.plot(i))
})
}
saveGIF(loop.animate(), interval = .5, movie.name="PDC_centrality_var.gif", ani.width = 400, ani.height = 400)
# create a function to plot ranks of temporal centrality #
draw.a.plot <- function(x){
data <- beta_centrality %>%
filter(var == x & SID %in% w1w2_subs) %>%
select(-Betweenness, -Closeness) %>%
gather(key = type2, value = Centrality, InStrength, OutStrength) %>%
group_by(wave, type2, var) %>%
mutate(rank = dense_rank(desc(Centrality)))
ggplot(data, aes(x = as.numeric(wave), y = rank, group = factor(SID))) +
geom_line(aes(color = factor(SID)), size = .3) +
geom_point(size = .5) +
scale_x_continuous(limits = c(.95, 2.05), breaks = c(1,2)) +
labs(x = "Wave", y = "Rank", title = x) +
theme_bw() +
facet_grid(.~type2, scale = "free") +
theme(panel.grid.major = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
plot.title = element_text(hjust = .5),
legend.position = "none")
}
# plot sample ranks plot for N_relaxed #
draw.a.plot("N_relaxed")
ggsave("~/Box Sync/network/PAIRS/Brown Bag Presentation 3.31/PDC_centrality_rank.png", width = 5, height = 5)
#set up function to loop through the draw.a.plot() function
loop.animate <- function() {
lapply(varnames[1:9], function(i) {
print(draw.a.plot(i))
})
}
saveGIF(loop.animate(), interval = .5, movie.name="PDC_centrality_rank.gif", ani.width = 400, ani.height = 400)
# create ranks and calculate rank order correlations #
kappa_centrality_cor <- tbl_df(kappa_centrality) %>%
filter(SID %in% w1w2_subs) %>%
gather(key = measure, value = Centrality, Betweenness:Strength) %>%
group_by(measure, var, type, wave) %>%
mutate(rank = min_rank(desc(Centrality))) %>%
ungroup() %>%
gather(key = measure2, value = value, Centrality, rank) %>%
unite(measure3, measure, measure2, remove = F, sep = ".") %>%
spread(key = wave, value = value) %>%
group_by(var, type, measure, measure2) %>%
nest() %>%
mutate(r = map(data, cor_fun))
# unnest rank order correlations #
kappa_centrality_cor.df <- kappa_centrality_cor %>%
filter(measure2 == "rank") %>%
unnest(r, .drop = T)
# create a table of average contemporaneous rank order correlations #
kable(kappa_centrality_cor.df %>%
group_by(measure2, measure) %>%
summarize(mean = meanSD_r2z2r(r)[1],
sd = meanSD_r2z2r(r)[2]),
caption = "Average Centrality Partial Contemporaneous Correlation")
# create heatmap of contemporaneous rank order centrality correlations #
kappa_centrality_cor.df %>%
filter(measure == "Strength") %>%
mutate(measure2 = "Rank-Order") %>%
filter(measure2 == "Rank-Order") %>%
ggplot(aes(x = measure, y = var, fill = r)) +
geom_raster() +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-.5,.5), space = "Lab",
name="Centrality\nCorrelations") +
geom_text(aes(label = round(r,2))) +
labs(x = NULL, y = NULL) +
facet_grid(.~measure) +
theme_classic() +
theme(axis.text = element_text(face = "bold"),
axis.text.x = element_blank(),
legend.text = element_text(face = "bold"),
legend.title = element_text(face = "bold"),
strip.text = element_text(face = "bold"))
ggsave("~/Box Sync/network/PAIRS/Brown Bag Presentation 3.31/PCC_centrality_var_cor.png",
width = 3.5, height = 4)
# create a function to plot profiles of contemporaneosu centrality #
draw.a.plot <- function(x){
data <- kappa_centrality %>%
select(-Betweenness, -Closeness) %>%
filter(var == x & SID %in% w1w2_subs) %>%
spread(key = wave, value = Strength) %>%
mutate(diff = `1` - `2`) %>%
gather(key = wave, value = Strength, `1`, `2`)
max_cen <- ceiling(max(data$Strength, na.rm = T))
break_cen <- max_cen/2
plot <- ggplot(data, aes(x = as.character(SID), y = Strength, group = wave)) +
geom_line(aes(color = wave), size = 1.2) +
scale_y_continuous(limits = c(-max_cen, max_cen), breaks = seq(-max_cen, max_cen, break_cen)) +
scale_color_manual(values = c("orchid", "black")) +
#geom_point(size = .5) +
labs(x = "Subject", y = "Centrality", color = "Wave",
title = x) +
coord_flip() +
theme_bw() +
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
panel.grid.major = element_blank(),
plot.title = element_text(hjust = .5))
plot2 <- plot + geom_line(aes(y = diff), color = "blue", size = 1)
return(list(plot, plot2))
}
#set up function to loop through the draw.a.plot() function
loop.animate <- function() {
lapply(varnames[1:9], function(i) {
print(draw.a.plot(i))
})
}
saveGIF(loop.animate(), interval = .5, movie.name="PCC_centrality_var.gif", ani.width = 400, ani.height = 400)
# create a function to plot ranks of contemporaneous centrality #
draw.a.plot <- function(x){
data <- beta_centrality %>%
filter(var == x & SID %in% w1w2_subs) %>%
select(-Betweenness, -Closeness) %>%
gather(key = type2, value = Centrality, InStrength, OutStrength) %>%
group_by(wave, type2, var) %>%
mutate(rank = dense_rank(desc(Centrality)))
ggplot(data, aes(x = as.numeric(wave), y = rank, group = factor(SID))) +
geom_line(aes(color = factor(SID)), size = .3) +
geom_point(size = .5) +
scale_x_continuous(limits = c(.95, 2.05), breaks = c(1,2)) +
labs(x = "Wave", y = "Rank", title = x) +
theme_bw() +
facet_grid(.~type2, scale = "free") +
theme(panel.grid.major = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
plot.title = element_text(hjust = .5),
legend.position = "none")
}
# plot sample ranks plot for N_relaxed #
draw.a.plot("N_relaxed")
ggsave("~/Box Sync/network/PAIRS/Brown Bag Presentation 3.31/PCC_centrality_rank.png", width = 5, height = 5)
#set up function to loop through the draw.a.plot() function
loop.animate <- function() {
lapply(varnames[1:9], function(i) {
print(draw.a.plot(i))
})
}
saveGIF(loop.animate(), interval = .5, movie.name="PCC_centrality_rank.gif", ani.width = 400, ani.height = 400)
We can also examine profile correlations of the subjects across waves. That is, for a single subject, does the relative importance of a node in a network remain stable over time. We calculate a correlation for each subject and plot the results in a histogram.
#####Temporal
# calculate each subjects profile correlations of temporal centrality #
beta_profile_cors <- tbl_df(beta_centrality) %>%
dplyr::filter(SID %in% w1w2_subs) %>%
gather(key = measure, value = Centrality, Betweenness:OutStrength) %>%
spread(key = wave, value = Centrality) %>%
arrange(SID, type, measure, var) %>%
group_by(SID, type, measure) %>%
nest() %>%
mutate(r = map(data, ~cor(.$`1`, .$`2`, use = "pairwise")))
# unnest temporal centrality profile correlations #
beta_profile_cors.df <- beta_profile_cors %>% unnest(r, .drop = T)
# create table of average temporal centrality profile correlations #
kable(beta_profile_cors.df %>%
group_by(type, measure) %>%
summarize(mean = meanSD_r2z2r(r)[1],
sd = meanSD_r2z2r(r)[2]),
caption = "Average Profile Correlation for Partial Directed Correlations")
| type | measure | mean | sd |
|---|---|---|---|
| Temporal | Betweenness | 0.0788584 | 0.3972753 |
| Temporal | Closeness | 0.0836630 | 0.7122792 |
| Temporal | InStrength | 0.3612647 | 0.4960504 |
| Temporal | OutStrength | 0.2386477 | 0.4123103 |
# plot histogram of temporal centrality profile correlations #
beta_profile_cors.df %>%
filter(measure %in% c("InStrength", "OutStrength")) %>%
ggplot(aes(x = r)) +
geom_histogram(binwidth = .1, color = "black", fill = "gray") +
xlim(-1,1) +
labs(x = "Correlation", y = "Frequency") +
facet_grid(.~measure) +
theme_bw()
ggsave("~/Box Sync/network/PAIRS/manuscript/R/ipsative_centralty.png", width = 6, height = 4)
# calculate each subjects profile correlations of contemporaneous centrality #
kappa_profile_cors <- kappa_centrality %>%
filter(SID %in% w1w2_subs) %>%
gather(key = measure, value = Centrality, Betweenness:Strength) %>%
spread(key = wave, value = Centrality) %>%
arrange(SID, type, measure, var) %>%
group_by(SID, type, measure) %>%
nest() %>%
mutate(r = map(data, ~cor(.$`1`, .$`2`, use = "pairwise")))
# unnest contemporaneous centrality profile correlations #
kappa_profile_cors.df <- kappa_profile_cors %>% unnest(r, .drop = T)
# histogram of contemporaneous centrality profile correlations #
kappa_profile_cors.df %>%
filter(measure == "Strength") %>%
ggplot(aes(x = r))+
geom_histogram(binwidth = .1, color = "black", fill = "gray")+
labs(y = "Frequency", x = "Profile Correlation",
title = "Contemporaneous\nProfile Correlations") +
facet_wrap(~type) +
theme_bw() +
theme(plot.title = element_text(hjust = .5))